SMP IPI support.
#if 0
#include <mach_apic.h>
#endif
+#include <asm-xen/evtchn.h>
#define xxprint(msg) HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg), msg)
return SET_APIC_DEST_FIELD(mask);
}
-void __send_IPI_shortcut(unsigned int shortcut, int vector)
-{
-#if 1
- xxprint("__send_IPI_shortcut\n");
-#else
- /*
- * Subtle. In the case of the 'never do double writes' workaround
- * we have to lock out interrupts to be safe. As we don't care
- * of the value read we use an atomic rmw access to avoid costly
- * cli/sti. Otherwise we use an even cheaper single atomic write
- * to the APIC.
- */
- unsigned int cfg;
+DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
-
- /*
- * No need to touch the target chip field
- */
- cfg = __prepare_ICR(shortcut, vector);
+static inline void __send_IPI_one(unsigned int cpu, int vector)
+{
+ unsigned int evtchn;
+
+ evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
+ // printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu, vector, evtchn);
+ if (evtchn) {
+ shared_info_t *s = HYPERVISOR_shared_info;
+ while (synch_test_bit(evtchn, &s->evtchn_pending[0]) ||
+ synch_test_bit(evtchn, &s->evtchn_mask[0]))
+ ;
+ notify_via_evtchn(evtchn);
+ } else
+ printk("send_IPI to unbound port %d/%d",
+ cpu, vector);
+}
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write_around(APIC_ICR, cfg);
-#endif
+void __send_IPI_shortcut(unsigned int shortcut, int vector)
+{
+ int cpu;
+
+ switch (shortcut) {
+ case APIC_DEST_SELF:
+ __send_IPI_one(smp_processor_id(), vector);
+ break;
+ case APIC_DEST_ALLBUT:
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ if (cpu_isset(cpu, cpu_online_map)) {
+ __send_IPI_one(cpu, vector);
+ }
+ }
+ break;
+ default:
+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
+ vector);
+ break;
+ }
}
void fastcall send_IPI_self(int vector)
/*
* This is only used on smaller machines.
*/
-void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
+void send_IPI_mask_bitmask(cpumask_t mask, int vector)
{
-#if 1
- xxprint("send_IPI_mask_bitmask\n");
- dump_stack();
-#else
- unsigned long mask = cpus_addr(cpumask)[0];
- unsigned long cfg;
unsigned long flags;
+ unsigned int cpu;
local_irq_save(flags);
-
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
-
- /*
- * prepare target chip field
- */
- cfg = __prepare_ICR2(mask);
- apic_write_around(APIC_ICR2, cfg);
-
- /*
- * program the ICR
- */
- cfg = __prepare_ICR(0, vector);
-
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write_around(APIC_ICR, cfg);
+
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ if (cpu_isset(cpu, mask)) {
+ __send_IPI_one(cpu, vector);
+ }
+ }
local_irq_restore(flags);
-#endif
}
inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
{
-#if 1
- xxprint("send_IPI_mask_sequence\n");
-#else
- unsigned long cfg, flags;
- unsigned int query_cpu;
- /*
- * Hack. The clustered APIC addressing mode doesn't allow us to send
- * to an arbitrary mask, so I do a unicasts to each CPU instead. This
- * should be modified to do 1 message per cluster ID - mbligh
- */
-
- local_irq_save(flags);
-
- for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
- if (cpu_isset(query_cpu, mask)) {
-
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
-
- /*
- * prepare target chip field
- */
- cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
- apic_write_around(APIC_ICR2, cfg);
-
- /*
- * program the ICR
- */
- cfg = __prepare_ICR(0, vector);
-
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write_around(APIC_ICR, cfg);
- }
- }
- local_irq_restore(flags);
-#endif
+ send_IPI_mask_bitmask(mask, vector);
}
#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
* 2) Leave the mm if we are in the lazy tlb mode.
*/
-asmlinkage void smp_invalidate_interrupt (void)
+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
{
unsigned long cpu;
} else
leave_mm(cpu);
}
-#if 1
- xxprint("smp_invalidate_interrupt ack_APIC_irq\n");
-#else
- ack_APIC_irq();
-#endif
+ xxprint("smp_invalidate_interrupt\n");
smp_mb__before_clear_bit();
cpu_clear(cpu, flush_cpumask);
smp_mb__after_clear_bit();
out:
put_cpu_no_resched();
+
+ return IRQ_HANDLED;
}
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
/* Wait for response */
while (atomic_read(&data.started) != cpus)
barrier();
-
if (wait)
while (atomic_read(&data.finished) != cpus)
barrier();
+
spin_unlock(&call_lock);
return 0;
* all the work is done automatically when
* we return from the interrupt.
*/
-asmlinkage void smp_reschedule_interrupt(void)
+irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
{
-#if 1
- xxprint("smp_reschedule_interrupt: ack_APIC_irq\n");
-#else
- ack_APIC_irq();
-#endif
+
+ return IRQ_HANDLED;
}
-asmlinkage void smp_call_function_interrupt(void)
+#include <linux/kallsyms.h>
+irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
int wait = call_data->wait;
-#if 1
- xxprint("smp_call_function_interrupt: ack_APIC_irq\n");
-#else
- ack_APIC_irq();
-#endif
/*
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function
mb();
atomic_inc(&call_data->finished);
}
+
+ return IRQ_HANDLED;
}
void local_setup_debug(void)
{
- int time_irq;
-
- time_irq = bind_virq_to_irq(VIRQ_DEBUG);
- (void)setup_irq(time_irq, &local_irq_debug);
+ (void)setup_irq(bind_virq_to_irq(VIRQ_DEBUG), &local_irq_debug);
}
setup_misdirect_virq();
local_setup_timer();
local_setup_debug(); /* XXX */
+ smp_intr_init();
local_irq_enable();
/*
* low-memory mappings have been cleared, flush them from
local_flush_tlb();
cpu_set(smp_processor_id(), cpu_online_map);
wmb();
- if (01) {
+ if (0) {
char *msg2 = "delay2\n";
int timeout;
for (timeout = 0; timeout < 50000; timeout++) {
return;
}
+ smp_intr_init();
+
#if 0
connect_bsp_APIC();
setup_local_APIC();
#endif
}
-void __init smp_intr_init(void)
-{
-#if 1
- xxprint("smp_intr_init\n");
-#else
- /*
- * IRQ0 must be given a fixed assignment and initialized,
- * because it's used before the IO-APIC is set up.
- */
- set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
+extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
- /*
- * The reschedule interrupt is a CPU-to-CPU reschedule-helper
- * IPI, driven by wakeup.
- */
- set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
+static struct irqaction reschedule_irq = {
+ smp_reschedule_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "reschedule",
+ NULL, NULL
+};
- /* IPI for invalidation */
- set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+extern irqreturn_t smp_invalidate_interrupt(int, void *, struct pt_regs *);
- /* IPI for generic function call */
- set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
-#endif
+static struct irqaction invalidate_irq = {
+ smp_invalidate_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "invalidate",
+ NULL, NULL
+};
+
+extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
+
+static struct irqaction call_function_irq = {
+ smp_call_function_interrupt, SA_INTERRUPT, CPU_MASK_NONE,
+ "call_function", NULL, NULL
+};
+
+void __init smp_intr_init(void)
+{
+
+ (void)setup_irq(
+ bind_ipi_on_cpu_to_irq(smp_processor_id(), RESCHEDULE_VECTOR),
+ &reschedule_irq);
+ (void)setup_irq(
+ bind_ipi_on_cpu_to_irq(smp_processor_id(), INVALIDATE_TLB_VECTOR),
+ &invalidate_irq);
+ (void)setup_irq(
+ bind_ipi_on_cpu_to_irq(smp_processor_id(), CALL_FUNCTION_VECTOR),
+ &call_function_irq);
}
/* IRQ <-> VIRQ mapping. */
DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
-#define NR_IPIS 8
-/* IRQ <-> IPI mapping. */
+/* evtchn <-> IPI mapping. */
DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
/* Reference counts for bindings to IRQs. */
spin_unlock(&irq_mapping_update_lock);
}
-void bind_ipi_on_cpu(int cpu, int ipi)
+int bind_ipi_on_cpu_to_irq(int cpu, int ipi)
{
evtchn_op_t op;
+ int evtchn, irq;
spin_lock(&irq_mapping_update_lock);
- if (per_cpu(ipi_to_evtchn, cpu)[ipi] == 0) {
+ if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
+ {
op.cmd = EVTCHNOP_bind_ipi;
op.u.bind_ipi.ipi_edom = cpu;
if ( HYPERVISOR_event_channel_op(&op) != 0 )
panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
+ evtchn = op.u.bind_ipi.port;
- per_cpu(ipi_to_evtchn, cpu)[ipi] = op.u.bind_ipi.port;
- }
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+ } else
+ irq = evtchn_to_irq[evtchn];
+
+ irq_bindcount[irq]++;
spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
}
-void unbind_ipi_on_cpu(int cpu, int ipi)
+void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
{
evtchn_op_t op;
int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
+ int irq = irq_to_evtchn[evtchn];
spin_lock(&irq_mapping_update_lock);
- op.cmd = EVTCHNOP_close;
- op.u.close.dom = DOMID_SELF;
- op.u.close.port = evtchn;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
+ if ( --irq_bindcount[irq] == 0 )
+ {
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
- per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
+ }
spin_unlock(&irq_mapping_update_lock);
}
spin_lock_init(&irq_mapping_update_lock);
- /* No VIRQ -> IRQ mappings. */
- for ( cpu = 0; cpu < NR_CPUS; cpu++ )
+ for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
+ /* No VIRQ -> IRQ mappings. */
for ( i = 0; i < NR_VIRQS; i++ )
per_cpu(virq_to_irq, cpu)[i] = -1;
+ }
/* No event-channel -> IRQ mappings. */
for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
* Vectors 0x20-0x2f are used for ISA interrupts.
*/
+#if 0
/*
* Special IRQ vectors used by the SMP architecture, 0xf0-0xff
*
* sources per level' errata.
*/
#define LOCAL_TIMER_VECTOR 0xef
+#endif
/*
* First APIC vector available to drivers: (vectors 0x30-0xee)
#define FIRST_DEVICE_VECTOR 0x31
#define FIRST_SYSTEM_VECTOR 0xef
-/* #define TIMER_IRQ _EVENT_TIMER */
-
/*
* 16 8259A IRQ's, 208 potential APIC interrupt sources.
* Right now the APIC is mostly only used for SMP.
* the usable vector space is 0x20-0xff (224 vectors)
*/
+#define NR_IPIS 8
+
+#define RESCHEDULE_VECTOR 1
+#define INVALIDATE_TLB_VECTOR 2
+#define CALL_FUNCTION_VECTOR 3
+
/*
* The maximum number of vectors supported by i386 processors
* is limited to 256. For processors other than i386, NR_VECTORS
/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
extern int bind_virq_to_irq(int virq);
extern void unbind_virq_from_irq(int virq);
-extern void bind_ipi_on_cpu(int cpu, int ipi);
-extern void unbind_ipi_on_cpu(int cpu, int ipi);
+extern int bind_ipi_on_cpu_to_irq(int cpu, int ipi);
+extern void unbind_ipi_on_cpu_from_irq(int cpu, int ipi);
extern int bind_evtchn_to_irq(int evtchn);
extern void unbind_evtchn_from_irq(int evtchn);
/*
* The use of 'barrier' in the following reflects their use as local-lock
* operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
- * critical operations are executed. All critical operatiosn must complete
+ * critical operations are executed. All critical operations must complete
* /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
* includes these barriers, for example.
*/
-#define __cli() \
-do { \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
+#define __cli() \
+do { \
+ vcpu_info_t *_vcpu = \
+ &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ barrier(); \
} while (0)
-#define __sti() \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
+#define __sti() \
+do { \
+ vcpu_info_t *_vcpu = \
+ &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ barrier(); \
+ _vcpu->evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
} while (0)
-#define __save_flags(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
+#define __save_flags(x) \
+do { \
+ vcpu_info_t *_vcpu = \
+ &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
} while (0)
-#define __restore_flags(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) { \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
- } \
+#define __restore_flags(x) \
+do { \
+ vcpu_info_t *_vcpu = \
+ &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ barrier(); \
+ if ( (_vcpu->evtchn_upcall_mask = (x)) == 0 ) { \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ } \
} while (0)
-#define safe_halt() ((void)0)
+#define safe_halt() ((void)0)
-#define __save_and_cli(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
+#define __save_and_cli(x) \
+do { \
+ vcpu_info_t *_vcpu = \
+ &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ barrier(); \
} while (0)
-#define __save_and_sti(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
+#define __save_and_sti(x) \
+do { \
+ vcpu_info_t *_vcpu = \
+ &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ barrier(); \
+ (x) = _vcpu->evtchn_upcall_mask; \
+ _vcpu->evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
} while (0)
#define local_irq_save(x) __save_and_cli(x)
-#define local_irq_restore(x) __restore_flags(x)
-#define local_save_flags(x) __save_flags(x)
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
+#define local_irq_restore(x) __restore_flags(x)
+#define local_save_flags(x) __save_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
#define irqs_disabled() \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
+ HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
/*
* disable hlt during certain critical i/o operations
#define INIT_EVENT_CHANNELS 16
#define MAX_EVENT_CHANNELS 1024
+#define EVENT_CHANNELS_SPREAD 32
-static int get_free_port(struct domain *d)
+static int get_free_port(struct exec_domain *ed)
{
+ struct domain *d = ed->domain;
int max, port;
event_channel_t *chn;
max = d->max_event_channel;
chn = d->event_channel;
- for ( port = 0; port < max; port++ )
+ for ( port = ed->eid * EVENT_CHANNELS_SPREAD; port < max; port++ )
if ( chn[port].state == ECS_FREE )
break;
- if ( port == max )
+ if ( port >= max )
{
if ( max == MAX_EVENT_CHANNELS )
return -ENOSPC;
- max *= 2;
+ max = port + EVENT_CHANNELS_SPREAD;
chn = xmalloc(max * sizeof(event_channel_t));
if ( unlikely(chn == NULL) )
if ( d->event_channel != NULL )
{
- memcpy(chn, d->event_channel, (max/2) * sizeof(event_channel_t));
+ memcpy(chn, d->event_channel, d->max_event_channel *
+ sizeof(event_channel_t));
xfree(d->event_channel);
}
spin_lock(&d->event_channel_lock);
- if ( (port = get_free_port(d)) >= 0 )
+ if ( (port = get_free_port(current)) >= 0 )
{
d->event_channel[port].state = ECS_UNBOUND;
d->event_channel[port].u.unbound.remote_domid = alloc->dom;
/* Obtain, or ensure that we already have, a valid <port1>. */
if ( port1 == 0 )
{
- if ( (port1 = get_free_port(d1)) < 0 )
+ if ( (port1 = get_free_port(ed1)) < 0 )
ERROR_EXIT(port1);
}
else if ( port1 >= d1->max_event_channel )
/* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
u16 tmp = d1->event_channel[port1].state;
d1->event_channel[port1].state = ECS_INTERDOMAIN;
- port2 = get_free_port(d2);
+ port2 = get_free_port(ed2);
d1->event_channel[port1].state = tmp;
if ( port2 < 0 )
ERROR_EXIT(port2);
*/
if ( ((port = ed->virq_to_evtchn[virq]) != 0) ||
(virq == VIRQ_MISDIRECT) ||
- ((port = get_free_port(d)) < 0) )
+ ((port = get_free_port(ed)) < 0) )
goto out;
d->event_channel[port].state = ECS_VIRQ;
spin_lock(&d->event_channel_lock);
- if ( (port = get_free_port(d)) >= 0 )
+ if ( (port = get_free_port(ed)) >= 0 )
{
d->event_channel[port].state = ECS_IPI;
d->event_channel[port].u.ipi_edom = ipi_edom;
spin_lock(&d->event_channel_lock);
if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
- ((rc = port = get_free_port(d)) < 0) )
+ ((rc = port = get_free_port(current)) < 0) )
goto out;
d->pirq_to_evtchn[pirq] = port;
{
struct domain *ld = current->domain;
struct exec_domain *rd;
- int rport;
+ int rport, ret = 0;
spin_lock(&ld->event_channel_lock);
if ( unlikely(lport < 0) ||
- unlikely(lport >= ld->max_event_channel) ||
- unlikely(ld->event_channel[lport].state != ECS_INTERDOMAIN) )
+ unlikely(lport >= ld->max_event_channel))
{
spin_unlock(&ld->event_channel_lock);
return -EINVAL;
}
- rd = ld->event_channel[lport].u.interdomain.remote_dom;
- rport = ld->event_channel[lport].u.interdomain.remote_port;
+ switch ( ld->event_channel[lport].state )
+ {
+ case ECS_INTERDOMAIN:
+ rd = ld->event_channel[lport].u.interdomain.remote_dom;
+ rport = ld->event_channel[lport].u.interdomain.remote_port;
- evtchn_set_pending(rd, rport);
+ evtchn_set_pending(rd, rport);
+ break;
+ case ECS_IPI:
+ rd = ld->exec_domain[ld->event_channel[lport].u.ipi_edom];
+ if ( rd )
+ evtchn_set_pending(rd, lport);
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
spin_unlock(&ld->event_channel_lock);
- return 0;
+ return ret;
}